In [2]:
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
from IPython.display import HTML
%matplotlib inline
In [3]:
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS=3
EPOCHS=50
In [8]:
dataset = tf.keras.preprocessing.image_dataset_from_directory(
    "POTATO",
    seed=123,
    shuffle=True,
    image_size=(IMAGE_SIZE,IMAGE_SIZE),
   batch_size=BATCH_SIZE
)
Found 1500 files belonging to 3 classes.
In [11]:
class_names = dataset.class_names
class_names
Out[11]:
['Test', 'Train', 'Valid']
In [12]:
for image_batch, labels_batch in dataset.take(1):
    print(image_batch.shape)
    print(labels_batch.numpy())
(32, 256, 256, 3)
[0 1 1 2 1 1 1 2 1 0 1 1 1 0 0 1 0 2 1 1 0 1 0 1 1 2 1 2 1 1 2 0]
In [19]:
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
    for i in range(12):
        ax = plt.subplot(3, 4, i + 1)
        plt.imshow(image_batch[i].numpy().astype("uint8"))
        plt.title(class_names[labels_batch[i]])
        plt.axis("off")
In [8]:
len(dataset)
Out[8]:
47
In [9]:
train_size = 0.8
len(dataset)*train_size
Out[9]:
37.6
In [10]:
train_ds = dataset.take(37)
len(train_ds)
Out[10]:
37
In [11]:
test_ds = dataset.skip(37)
len(test_ds)
Out[11]:
10
In [12]:
val_size=0.1
len(dataset)*val_size
Out[12]:
4.7
In [13]:
c
Out[13]:
4
In [14]:
test_ds = test_ds.skip(count=4)
len(test_ds)
Out[14]:
6
In [15]:
def get_dataset_partitions_tf(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000):
    assert (train_split + test_split + val_split) == 1
    
    ds_size = len(ds)
    
    if shuffle:
        ds = ds.shuffle(shuffle_size, seed=12)
    
    train_size = int(train_split * ds_size)
    val_size = int(val_split * ds_size)
    
    train_ds = ds.take(train_size)    
    val_ds = ds.skip(train_size).take(val_size)
    test_ds = ds.skip(train_size).skip(val_size)
    
    return train_ds, val_ds, test_ds
In [16]:
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
In [17]:
len(train_ds)
Out[17]:
37
In [18]:
len(val_ds)
Out[18]:
4
In [19]:
len(val_ds)
Out[19]:
4
In [20]:
len(test_ds)
Out[20]:
6
In [21]:
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
In [22]:
resize_and_rescale = tf.keras.Sequential([
  layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
  layers.experimental.preprocessing.Rescaling(1./255),
])
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\backend.py:873: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.

In [23]:
data_augmentation = tf.keras.Sequential([
  layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
  layers.experimental.preprocessing.RandomRotation(0.2),
])
In [24]:
train_ds = train_ds.map(
    lambda x, y: (data_augmentation(x, training=True), y)
).prefetch(buffer_size=tf.data.AUTOTUNE)
In [25]:
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 3

model = models.Sequential([
    resize_and_rescale,
    layers.Conv2D(32, kernel_size = (3,3), activation='relu', input_shape=input_shape),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64,  kernel_size = (3,3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64,  kernel_size = (3,3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(n_classes, activation='softmax'),
])

model.build(input_shape=input_shape)
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\layers\pooling\max_pooling2d.py:161: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.

In [26]:
model.summary()
Model: "sequential_2"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 sequential (Sequential)     (32, 256, 256, 3)         0         
                                                                 
 conv2d (Conv2D)             (32, 254, 254, 32)        896       
                                                                 
 max_pooling2d (MaxPooling2  (32, 127, 127, 32)        0         
 D)                                                              
                                                                 
 conv2d_1 (Conv2D)           (32, 125, 125, 64)        18496     
                                                                 
 max_pooling2d_1 (MaxPoolin  (32, 62, 62, 64)          0         
 g2D)                                                            
                                                                 
 conv2d_2 (Conv2D)           (32, 60, 60, 64)          36928     
                                                                 
 max_pooling2d_2 (MaxPoolin  (32, 30, 30, 64)          0         
 g2D)                                                            
                                                                 
 conv2d_3 (Conv2D)           (32, 28, 28, 64)          36928     
                                                                 
 max_pooling2d_3 (MaxPoolin  (32, 14, 14, 64)          0         
 g2D)                                                            
                                                                 
 conv2d_4 (Conv2D)           (32, 12, 12, 64)          36928     
                                                                 
 max_pooling2d_4 (MaxPoolin  (32, 6, 6, 64)            0         
 g2D)                                                            
                                                                 
 conv2d_5 (Conv2D)           (32, 4, 4, 64)            36928     
                                                                 
 max_pooling2d_5 (MaxPoolin  (32, 2, 2, 64)            0         
 g2D)                                                            
                                                                 
 flatten (Flatten)           (32, 256)                 0         
                                                                 
 dense (Dense)               (32, 64)                  16448     
                                                                 
 dense_1 (Dense)             (32, 3)                   195       
                                                                 
=================================================================
Total params: 183747 (717.76 KB)
Trainable params: 183747 (717.76 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
In [27]:
model.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics=['accuracy']
)
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\optimizers\__init__.py:309: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.

In [28]:
history = model.fit(
    train_ds,
    batch_size=BATCH_SIZE,
    validation_data=val_ds,
    verbose=1,
    epochs=50,
)
Epoch 1/50
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\utils\tf_utils.py:492: The name tf.ragged.RaggedTensorValue is deprecated. Please use tf.compat.v1.ragged.RaggedTensorValue instead.

WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\base_layer_utils.py:384: The name tf.executing_eagerly_outside_functions is deprecated. Please use tf.compat.v1.executing_eagerly_outside_functions instead.

37/37 [==============================] - 45s 1s/step - loss: 0.9697 - accuracy: 0.6068 - val_loss: 0.9590 - val_accuracy: 0.5938
Epoch 2/50
37/37 [==============================] - 39s 1s/step - loss: 0.9472 - accuracy: 0.6068 - val_loss: 0.9565 - val_accuracy: 0.5938
Epoch 3/50
37/37 [==============================] - 38s 1s/step - loss: 0.9499 - accuracy: 0.6068 - val_loss: 0.9671 - val_accuracy: 0.5938
Epoch 4/50
37/37 [==============================] - 38s 1s/step - loss: 0.9568 - accuracy: 0.6068 - val_loss: 0.9561 - val_accuracy: 0.5938
Epoch 5/50
37/37 [==============================] - 38s 1s/step - loss: 0.9510 - accuracy: 0.6068 - val_loss: 0.9568 - val_accuracy: 0.5938
Epoch 6/50
37/37 [==============================] - 37s 1s/step - loss: 0.9464 - accuracy: 0.6068 - val_loss: 0.9631 - val_accuracy: 0.5938
Epoch 7/50
37/37 [==============================] - 38s 1s/step - loss: 0.9465 - accuracy: 0.6068 - val_loss: 0.9560 - val_accuracy: 0.5938
Epoch 8/50
37/37 [==============================] - 37s 990ms/step - loss: 0.9457 - accuracy: 0.6068 - val_loss: 0.9567 - val_accuracy: 0.5938
Epoch 9/50
37/37 [==============================] - 37s 996ms/step - loss: 0.9450 - accuracy: 0.6068 - val_loss: 0.9561 - val_accuracy: 0.5938
Epoch 10/50
37/37 [==============================] - 37s 1s/step - loss: 0.9459 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 11/50
37/37 [==============================] - 37s 986ms/step - loss: 0.9453 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 12/50
37/37 [==============================] - 38s 1s/step - loss: 0.9443 - accuracy: 0.6068 - val_loss: 0.9563 - val_accuracy: 0.5938
Epoch 13/50
37/37 [==============================] - 38s 1s/step - loss: 0.9496 - accuracy: 0.6068 - val_loss: 0.9569 - val_accuracy: 0.5938
Epoch 14/50
37/37 [==============================] - 39s 1s/step - loss: 0.9446 - accuracy: 0.6068 - val_loss: 0.9565 - val_accuracy: 0.5938
Epoch 15/50
37/37 [==============================] - 38s 1s/step - loss: 0.9422 - accuracy: 0.6068 - val_loss: 0.9604 - val_accuracy: 0.5938
Epoch 16/50
37/37 [==============================] - 39s 1s/step - loss: 0.9437 - accuracy: 0.6068 - val_loss: 0.9583 - val_accuracy: 0.5938
Epoch 17/50
37/37 [==============================] - 38s 1s/step - loss: 0.9432 - accuracy: 0.6068 - val_loss: 0.9561 - val_accuracy: 0.5938
Epoch 18/50
37/37 [==============================] - 38s 1s/step - loss: 0.9456 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 19/50
37/37 [==============================] - 38s 1s/step - loss: 0.9434 - accuracy: 0.6068 - val_loss: 0.9576 - val_accuracy: 0.5938
Epoch 20/50
37/37 [==============================] - 39s 1s/step - loss: 0.9476 - accuracy: 0.6068 - val_loss: 0.9615 - val_accuracy: 0.5938
Epoch 21/50
37/37 [==============================] - 38s 1s/step - loss: 0.9449 - accuracy: 0.6068 - val_loss: 0.9564 - val_accuracy: 0.5938
Epoch 22/50
37/37 [==============================] - 37s 1000ms/step - loss: 0.9436 - accuracy: 0.6068 - val_loss: 0.9600 - val_accuracy: 0.5938
Epoch 23/50
37/37 [==============================] - 38s 1s/step - loss: 0.9451 - accuracy: 0.6068 - val_loss: 0.9563 - val_accuracy: 0.5938
Epoch 24/50
37/37 [==============================] - 38s 1s/step - loss: 0.9433 - accuracy: 0.6068 - val_loss: 0.9589 - val_accuracy: 0.5938
Epoch 25/50
37/37 [==============================] - 39s 1s/step - loss: 0.9435 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 26/50
37/37 [==============================] - 39s 1s/step - loss: 0.9421 - accuracy: 0.6068 - val_loss: 0.9622 - val_accuracy: 0.5938
Epoch 27/50
37/37 [==============================] - 39s 1s/step - loss: 0.9451 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 28/50
37/37 [==============================] - 38s 1s/step - loss: 0.9427 - accuracy: 0.6068 - val_loss: 0.9589 - val_accuracy: 0.5938
Epoch 29/50
37/37 [==============================] - 39s 1s/step - loss: 0.9451 - accuracy: 0.6068 - val_loss: 0.9571 - val_accuracy: 0.5938
Epoch 30/50
37/37 [==============================] - 38s 1s/step - loss: 0.9430 - accuracy: 0.6068 - val_loss: 0.9560 - val_accuracy: 0.5938
Epoch 31/50
37/37 [==============================] - 36s 959ms/step - loss: 0.9432 - accuracy: 0.6068 - val_loss: 0.9566 - val_accuracy: 0.5938
Epoch 32/50
37/37 [==============================] - 36s 957ms/step - loss: 0.9438 - accuracy: 0.6068 - val_loss: 0.9581 - val_accuracy: 0.5938
Epoch 33/50
37/37 [==============================] - 36s 967ms/step - loss: 0.9446 - accuracy: 0.6068 - val_loss: 0.9572 - val_accuracy: 0.5938
Epoch 34/50
37/37 [==============================] - 37s 991ms/step - loss: 0.9435 - accuracy: 0.6068 - val_loss: 0.9578 - val_accuracy: 0.5938
Epoch 35/50
37/37 [==============================] - 38s 1s/step - loss: 0.9435 - accuracy: 0.6068 - val_loss: 0.9568 - val_accuracy: 0.5938
Epoch 36/50
37/37 [==============================] - 37s 991ms/step - loss: 0.9454 - accuracy: 0.6068 - val_loss: 0.9560 - val_accuracy: 0.5938
Epoch 37/50
37/37 [==============================] - 38s 1s/step - loss: 0.9432 - accuracy: 0.6068 - val_loss: 0.9563 - val_accuracy: 0.5938
Epoch 38/50
37/37 [==============================] - 36s 973ms/step - loss: 0.9435 - accuracy: 0.6068 - val_loss: 0.9574 - val_accuracy: 0.5938
Epoch 39/50
37/37 [==============================] - 36s 976ms/step - loss: 0.9431 - accuracy: 0.6068 - val_loss: 0.9564 - val_accuracy: 0.5938
Epoch 40/50
37/37 [==============================] - 37s 1s/step - loss: 0.9439 - accuracy: 0.6068 - val_loss: 0.9560 - val_accuracy: 0.5938
Epoch 41/50
37/37 [==============================] - 36s 970ms/step - loss: 0.9446 - accuracy: 0.6068 - val_loss: 0.9572 - val_accuracy: 0.5938
Epoch 42/50
37/37 [==============================] - 38s 1s/step - loss: 0.9439 - accuracy: 0.6068 - val_loss: 0.9565 - val_accuracy: 0.5938
Epoch 43/50
37/37 [==============================] - 38s 1s/step - loss: 0.9431 - accuracy: 0.6068 - val_loss: 0.9594 - val_accuracy: 0.5938
Epoch 44/50
37/37 [==============================] - 38s 1s/step - loss: 0.9459 - accuracy: 0.6068 - val_loss: 0.9580 - val_accuracy: 0.5938
Epoch 45/50
37/37 [==============================] - 38s 1s/step - loss: 0.9431 - accuracy: 0.6068 - val_loss: 0.9563 - val_accuracy: 0.5938
Epoch 46/50
37/37 [==============================] - 37s 996ms/step - loss: 0.9432 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 47/50
37/37 [==============================] - 38s 1s/step - loss: 0.9435 - accuracy: 0.6068 - val_loss: 0.9562 - val_accuracy: 0.5938
Epoch 48/50
37/37 [==============================] - 38s 1s/step - loss: 0.9435 - accuracy: 0.6068 - val_loss: 0.9560 - val_accuracy: 0.5938
Epoch 49/50
37/37 [==============================] - 38s 1s/step - loss: 0.9450 - accuracy: 0.6068 - val_loss: 0.9566 - val_accuracy: 0.5938
Epoch 50/50
37/37 [==============================] - 38s 1s/step - loss: 0.9444 - accuracy: 0.6068 - val_loss: 0.9566 - val_accuracy: 0.5938
In [29]:
scores = model.evaluate(test_ds)
6/6 [==============================] - 3s 287ms/step - loss: 0.9242 - accuracy: 0.6250
In [30]:
history
Out[30]:
<keras.src.callbacks.History at 0x183b1f8ced0>
In [31]:
history.params
Out[31]:
{'verbose': 1, 'epochs': 50, 'steps': 37}
In [32]:
history.history.keys()
Out[32]:
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
In [33]:
type(history.history['loss'])
Out[33]:
list
In [34]:
len(history.history['loss'])
Out[34]:
50
In [35]:
history.history['loss'][:5] # show loss for first 5 epochs
Out[35]:
[0.969731330871582,
 0.9471847414970398,
 0.94986492395401,
 0.9568473100662231,
 0.9510318040847778]
In [36]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']
In [41]:
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(range(EPOCHS), acc, label='Training Accuracy')
plt.plot(range(EPOCHS), val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(range(EPOCHS), loss, label='Training Loss')
plt.plot(range(EPOCHS), val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
In [37]:
import numpy as np
for images_batch, labels_batch in test_ds.take(1):
    
    first_image = images_batch[0].numpy().astype('uint8')
    first_label = labels_batch[0].numpy()
    
    print("first image to predict")
    plt.imshow(first_image)
    print("actual label:",class_names[first_label])
    
    batch_prediction = model.predict(images_batch)
    print("predicted label:",class_names[np.argmax(batch_prediction[0])])
first image to predict
actual label: Train
1/1 [==============================] - 1s 871ms/step
predicted label: Train
In [38]:
def predict(model, img):
    img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
    img_array = tf.expand_dims(img_array, 0)

    predictions = model.predict(img_array)

    predicted_class = class_names[np.argmax(predictions[0])]
    confidence = round(100 * (np.max(predictions[0])), 2)
    return predicted_class, confidence
In [39]:
plt.figure(figsize=(15, 15))
for images, labels in test_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(images[i].numpy().astype("uint8"))
        
        predicted_class, confidence = predict(model, images[i].numpy())
        actual_class = class_names[labels[i]] 
        
        plt.title(f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%")
        
        plt.axis("off")
1/1 [==============================] - 0s 328ms/step
1/1 [==============================] - 0s 78ms/step
1/1 [==============================] - 0s 78ms/step
1/1 [==============================] - 0s 65ms/step
1/1 [==============================] - 0s 80ms/step
1/1 [==============================] - 0s 63ms/step
1/1 [==============================] - 0s 78ms/step
1/1 [==============================] - 0s 78ms/step
1/1 [==============================] - 0s 62ms/step
In [42]:
import os

model_directory = r"C:\be project\models"
existing_models = [f for f in os.listdir(model_directory) if f.startswith("model_") and f.endswith(".h5")]

# Extract version numbers and convert them to integers
model_versions = [int(f.split('_')[1].split('.')[0]) for f in existing_models]

# Find the maximum version number
if model_versions:
    model_version = max(model_versions) + 1
else:
    model_version = 1

# Save the new model
save_directory = r"C:\be project\models"
model.save(os.path.join(save_directory, f"model_{model_version}.h5"))
In [44]:
model.save(r"C:\be project")
INFO:tensorflow:Assets written to: C:\be project\assets
INFO:tensorflow:Assets written to: C:\be project\assets
In [2]:
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.

In [3]:
train_data_dir = r'C:\be project\Potato\Train'
val_data_dir = r'C:\be project\Potato\Valid'
In [4]:
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="potato",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="potato", target_size=(224,224))
Found 1500 images belonging to 3 classes.
Found 1500 images belonging to 3 classes.
In [1]:
def n_n():
    model = models.Sequential()
In [9]:
model = Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\backend.py:873: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.

WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\layers\pooling\max_pooling2d.py:161: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.

In [11]:
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=2, activation="softmax"))
In [12]:
from keras.optimizers import Adam
opt = Adam(lr=0.001)
model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
In [13]:
model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 224, 224, 64)      1792      
                                                                 
 conv2d_1 (Conv2D)           (None, 224, 224, 64)      36928     
                                                                 
 max_pooling2d (MaxPooling2  (None, 112, 112, 64)      0         
 D)                                                              
                                                                 
 conv2d_2 (Conv2D)           (None, 112, 112, 128)     73856     
                                                                 
 conv2d_3 (Conv2D)           (None, 112, 112, 128)     147584    
                                                                 
 max_pooling2d_1 (MaxPoolin  (None, 56, 56, 128)       0         
 g2D)                                                            
                                                                 
 conv2d_4 (Conv2D)           (None, 56, 56, 256)       295168    
                                                                 
 conv2d_5 (Conv2D)           (None, 56, 56, 256)       590080    
                                                                 
 conv2d_6 (Conv2D)           (None, 56, 56, 256)       590080    
                                                                 
 max_pooling2d_2 (MaxPoolin  (None, 28, 28, 256)       0         
 g2D)                                                            
                                                                 
 conv2d_7 (Conv2D)           (None, 28, 28, 512)       1180160   
                                                                 
 conv2d_8 (Conv2D)           (None, 28, 28, 512)       2359808   
                                                                 
 conv2d_9 (Conv2D)           (None, 28, 28, 512)       2359808   
                                                                 
 max_pooling2d_3 (MaxPoolin  (None, 14, 14, 512)       0         
 g2D)                                                            
                                                                 
 conv2d_10 (Conv2D)          (None, 14, 14, 512)       2359808   
                                                                 
 conv2d_11 (Conv2D)          (None, 14, 14, 512)       2359808   
                                                                 
 conv2d_12 (Conv2D)          (None, 14, 14, 512)       2359808   
                                                                 
 max_pooling2d_4 (MaxPoolin  (None, 7, 7, 512)         0         
 g2D)                                                            
                                                                 
 flatten (Flatten)           (None, 25088)             0         
                                                                 
 dense (Dense)               (None, 4096)              102764544 
                                                                 
 dense_1 (Dense)             (None, 4096)              16781312  
                                                                 
 dense_2 (Dense)             (None, 2)                 8194      
                                                                 
 flatten_1 (Flatten)         (None, 2)                 0         
                                                                 
 dense_3 (Dense)             (None, 4096)              12288     
                                                                 
 dense_4 (Dense)             (None, 4096)              16781312  
                                                                 
 dense_5 (Dense)             (None, 2)                 8194      
                                                                 
=================================================================
Total params: 151070532 (576.29 MB)
Trainable params: 151070532 (576.29 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
In [14]:
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')
hist = model.fit_generator(steps_per_epoch=100,generator=traindata, validation_data= testdata, validation_steps=10,epochs=100,callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
C:\Users\aditi\AppData\Local\Temp\ipykernel_8228\1266381948.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  hist = model.fit_generator(steps_per_epoch=100,generator=traindata, validation_data= testdata, validation_steps=10,epochs=100,callbacks=[checkpoint,early])
Epoch 1/100
WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\utils\tf_utils.py:492: The name tf.ragged.RaggedTensorValue is deprecated. Please use tf.compat.v1.ragged.RaggedTensorValue instead.

WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\utils\tf_utils.py:492: The name tf.ragged.RaggedTensorValue is deprecated. Please use tf.compat.v1.ragged.RaggedTensorValue instead.

WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\base_layer_utils.py:384: The name tf.executing_eagerly_outside_functions is deprecated. Please use tf.compat.v1.executing_eagerly_outside_functions instead.

WARNING:tensorflow:From C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\base_layer_utils.py:384: The name tf.executing_eagerly_outside_functions is deprecated. Please use tf.compat.v1.executing_eagerly_outside_functions instead.

---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
Cell In[14], line 4
      2 checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
      3 early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')
----> 4 hist = model.fit_generator(steps_per_epoch=100,generator=traindata, validation_data= testdata, validation_steps=10,epochs=100,callbacks=[checkpoint,early])

File ~\anaconda3\Lib\site-packages\keras\src\engine\training.py:2913, in Model.fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   2901 """Fits the model on data yielded batch-by-batch by a Python generator.
   2902 
   2903 DEPRECATED:
   2904   `Model.fit` now supports generators, so there is no longer any need to
   2905   use this endpoint.
   2906 """
   2907 warnings.warn(
   2908     "`Model.fit_generator` is deprecated and "
   2909     "will be removed in a future version. "
   2910     "Please use `Model.fit`, which supports generators.",
   2911     stacklevel=2,
   2912 )
-> 2913 return self.fit(
   2914     generator,
   2915     steps_per_epoch=steps_per_epoch,
   2916     epochs=epochs,
   2917     verbose=verbose,
   2918     callbacks=callbacks,
   2919     validation_data=validation_data,
   2920     validation_steps=validation_steps,
   2921     validation_freq=validation_freq,
   2922     class_weight=class_weight,
   2923     max_queue_size=max_queue_size,
   2924     workers=workers,
   2925     use_multiprocessing=use_multiprocessing,
   2926     shuffle=shuffle,
   2927     initial_epoch=initial_epoch,
   2928 )

File ~\anaconda3\Lib\site-packages\keras\src\utils\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
     67     filtered_tb = _process_traceback_frames(e.__traceback__)
     68     # To get the full stack trace, call:
     69     # `tf.debugging.disable_traceback_filtering()`
---> 70     raise e.with_traceback(filtered_tb) from None
     71 finally:
     72     del filtered_tb

File ~\anaconda3\Lib\site-packages\tensorflow\python\eager\execute.py:53, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     51 try:
     52   ctx.ensure_initialized()
---> 53   tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
     54                                       inputs, attrs, num_outputs)
     55 except core._NotOkStatusException as e:
     56   if name is not None:

InvalidArgumentError: Graph execution error:

Detected at node categorical_crossentropy/softmax_cross_entropy_with_logits defined at (most recent call last):
  File "<frozen runpy>", line 198, in _run_module_as_main

  File "<frozen runpy>", line 88, in _run_code

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel_launcher.py", line 17, in <module>

  File "C:\Users\aditi\anaconda3\Lib\site-packages\traitlets\config\application.py", line 992, in launch_instance

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\kernelapp.py", line 736, in start

  File "C:\Users\aditi\anaconda3\Lib\site-packages\tornado\platform\asyncio.py", line 195, in start

  File "C:\Users\aditi\anaconda3\Lib\asyncio\base_events.py", line 607, in run_forever

  File "C:\Users\aditi\anaconda3\Lib\asyncio\base_events.py", line 1922, in _run_once

  File "C:\Users\aditi\anaconda3\Lib\asyncio\events.py", line 80, in _run

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\kernelbase.py", line 516, in dispatch_queue

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\kernelbase.py", line 505, in process_one

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\kernelbase.py", line 412, in dispatch_shell

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\kernelbase.py", line 740, in execute_request

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\ipkernel.py", line 422, in do_execute

  File "C:\Users\aditi\anaconda3\Lib\site-packages\ipykernel\zmqshell.py", line 546, in run_cell

  File "C:\Users\aditi\anaconda3\Lib\site-packages\IPython\core\interactiveshell.py", line 3024, in run_cell

  File "C:\Users\aditi\anaconda3\Lib\site-packages\IPython\core\interactiveshell.py", line 3079, in _run_cell

  File "C:\Users\aditi\anaconda3\Lib\site-packages\IPython\core\async_helpers.py", line 129, in _pseudo_sync_runner

  File "C:\Users\aditi\anaconda3\Lib\site-packages\IPython\core\interactiveshell.py", line 3284, in run_cell_async

  File "C:\Users\aditi\anaconda3\Lib\site-packages\IPython\core\interactiveshell.py", line 3466, in run_ast_nodes

  File "C:\Users\aditi\anaconda3\Lib\site-packages\IPython\core\interactiveshell.py", line 3526, in run_code

  File "C:\Users\aditi\AppData\Local\Temp\ipykernel_8228\1266381948.py", line 4, in <module>

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 2913, in fit_generator

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 1807, in fit

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 1401, in train_function

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 1384, in step_function

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 1373, in run_step

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 1151, in train_step

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\training.py", line 1209, in compute_loss

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\engine\compile_utils.py", line 277, in __call__

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\losses.py", line 143, in __call__

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\losses.py", line 270, in call

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\losses.py", line 2221, in categorical_crossentropy

  File "C:\Users\aditi\anaconda3\Lib\site-packages\keras\src\backend.py", line 5579, in categorical_crossentropy

logits and labels must be broadcastable: logits_size=[32,2] labels_size=[32,3]
	 [[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_4593]
In [ ]: